#define PREFIX VTDPREFIX "ACPI DMAR:"
#define DEBUG
-#define MIN_SCOPE_LEN (sizeof(struct acpi_pci_path) + sizeof(struct acpi_dev_scope))
+#define MIN_SCOPE_LEN (sizeof(struct acpi_pci_path) + \
+ sizeof(struct acpi_dev_scope))
LIST_HEAD(acpi_drhd_units);
LIST_HEAD(acpi_rmrr_units);
* add INCLUDE_ALL at the tail, so scan the list will find it at
* the very end.
*/
- if (drhd->include_all)
+ if ( drhd->include_all )
list_add_tail(&drhd->list, &acpi_drhd_units);
else
list_add(&drhd->list, &acpi_drhd_units);
}
static int acpi_pci_device_match(struct pci_dev *devices, int cnt,
- struct pci_dev *dev)
+ struct pci_dev *dev)
{
int i;
- for (i = 0; i < cnt; i++) {
- if ((dev->bus == devices->bus) &&
- (dev->devfn == devices->devfn))
+ for ( i = 0; i < cnt; i++ )
+ {
+ if ( (dev->bus == devices->bus) &&
+ (dev->devfn == devices->devfn) )
return 1;
devices++;
}
* add ALL_PORTS at the tail, so scan the list will find it at
* the very end.
*/
- if (atsr->all_ports)
+ if ( atsr->all_ports )
list_add_tail(&atsr->list, &acpi_atsr_units);
else
list_add(&atsr->list, &acpi_atsr_units);
struct acpi_drhd_unit *include_all_drhd;
include_all_drhd = NULL;
- list_for_each_entry(drhd, &acpi_drhd_units, list) {
- if (drhd->include_all)
+ list_for_each_entry ( drhd, &acpi_drhd_units, list )
+ {
+ if ( drhd->include_all )
include_all_drhd = drhd;
- if (acpi_pci_device_match(drhd->devices,
- drhd->devices_cnt, dev))
+ if ( acpi_pci_device_match(drhd->devices,
+ drhd->devices_cnt, dev) )
{
gdprintk(XENLOG_INFO VTDPREFIX,
"acpi_find_matched_drhd_unit: drhd->address = %lx\n",
}
}
- if (include_all_drhd) {
+ if ( include_all_drhd )
+ {
gdprintk(XENLOG_INFO VTDPREFIX,
"acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n",
include_all_drhd->address);
return include_all_drhd;;
}
- return(NULL);
+ return NULL;
}
struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev)
{
struct acpi_rmrr_unit *rmrr;
- list_for_each_entry(rmrr, &acpi_rmrr_units, list) {
- if (acpi_pci_device_match(rmrr->devices,
- rmrr->devices_cnt, dev))
- goto out;
+ list_for_each_entry ( rmrr, &acpi_rmrr_units, list )
+ {
+ if ( acpi_pci_device_match(rmrr->devices,
+ rmrr->devices_cnt, dev) )
+ return rmrr;
}
- rmrr = NULL;
-out:
- return rmrr;
+
+ return NULL;
}
struct acpi_atsr_unit * acpi_find_matched_atsr_unit(struct pci_dev *dev)
struct acpi_atsr_unit *all_ports_atsru;
all_ports_atsru = NULL;
- list_for_each_entry(atsru, &acpi_atsr_units, list) {
- if (atsru->all_ports)
+ list_for_each_entry ( atsru, &acpi_atsr_units, list )
+ {
+ if ( atsru->all_ports )
all_ports_atsru = atsru;
- if (acpi_pci_device_match(atsru->devices, atsru->devices_cnt, dev))
+ if ( acpi_pci_device_match(atsru->devices,
+ atsru->devices_cnt, dev) )
return atsru;
}
- if (all_ports_atsru) {
- gdprintk(XENLOG_INFO VTDPREFIX,
+
+ if ( all_ports_atsru )
+ {
+ gdprintk(XENLOG_INFO VTDPREFIX,
"acpi_find_matched_atsr_unit: all_ports_atsru\n");
return all_ports_atsru;;
}
- return(NULL);
+
+ return NULL;
}
-static int __init acpi_parse_dev_scope(void *start, void *end, int *cnt,
- struct pci_dev **devices)
+static int scope_device_count(void *start, void *end)
{
struct acpi_dev_scope *scope;
u8 bus, sub_bus, sec_bus;
struct acpi_pci_path *path;
- struct acpi_ioapic_unit *acpi_ioapic_unit = NULL;
- int count, dev_count=0;
- struct pci_dev *pdev;
+ int depth, count = 0;
u8 dev, func;
u32 l;
- void *tmp;
- *cnt = 0;
- tmp = start;
- while (start < end) {
+ while ( start < end )
+ {
scope = start;
- if (scope->length < MIN_SCOPE_LEN ||
- (scope->dev_type != ACPI_DEV_ENDPOINT &&
- scope->dev_type != ACPI_DEV_P2PBRIDGE)) {
+ if ( scope->length < MIN_SCOPE_LEN )
+ {
printk(KERN_WARNING PREFIX "Invalid device scope\n");
return -EINVAL;
}
- (*cnt)++;
- start += scope->length;
- }
- start = tmp;
- while (start < end) {
- scope = start;
path = (struct acpi_pci_path *)(scope + 1);
- count = (scope->length - sizeof(struct acpi_dev_scope))
- /sizeof(struct acpi_pci_path);
bus = scope->start_bus;
-
- while (--count) {
- bus = read_pci_config_byte(bus, path->dev,
- path->fn, PCI_SECONDARY_BUS);
+ depth = (scope->length - sizeof(struct acpi_dev_scope))
+ / sizeof(struct acpi_pci_path);
+ while ( --depth )
+ {
+ bus = read_pci_config_byte(
+ bus, path->dev, path->fn, PCI_SECONDARY_BUS);
path++;
}
- if (scope->dev_type == ACPI_DEV_ENDPOINT) {
- printk(KERN_WARNING PREFIX
- "found endpoint: bdf = %x:%x:%x\n", bus, path->dev, path->fn);
- dev_count++;
- } else if (scope->dev_type == ACPI_DEV_P2PBRIDGE) {
- printk(KERN_WARNING PREFIX
- "found bridge: bdf = %x:%x:%x\n", bus, path->dev, path->fn);
-
- sec_bus = read_pci_config_byte(bus, path->dev,
- path->fn, PCI_SECONDARY_BUS);
- sub_bus = read_pci_config_byte(bus, path->dev,
- path->fn, PCI_SUBORDINATE_BUS);
- while (sec_bus <= sub_bus) {
- for (dev = 0; dev < 32; dev++) {
- for (func = 0; func < 8; func++) {
- l = read_pci_config(sec_bus, dev, func, PCI_VENDOR_ID);
-
- /* some broken boards return 0 or ~0 if a slot is empty: */
- if (l == 0xffffffff || l == 0x00000000 ||
- l == 0x0000ffff || l == 0xffff0000)
+ if ( scope->dev_type == ACPI_DEV_ENDPOINT )
+ {
+ printk(KERN_INFO PREFIX
+ "found endpoint: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
+ count++;
+ }
+ else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
+ {
+ printk(KERN_INFO PREFIX
+ "found bridge: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
+ sec_bus = read_pci_config_byte(
+ bus, path->dev, path->fn, PCI_SECONDARY_BUS);
+ sub_bus = read_pci_config_byte(
+ bus, path->dev, path->fn, PCI_SUBORDINATE_BUS);
+
+ while ( sec_bus <= sub_bus )
+ {
+ for ( dev = 0; dev < 32; dev++ )
+ {
+ for ( func = 0; func < 8; func++ )
+ {
+ l = read_pci_config(
+ sec_bus, dev, func, PCI_VENDOR_ID);
+
+ /* some broken boards return 0 or
+ * ~0 if a slot is empty
+ */
+ if ( l == 0xffffffff || l == 0x00000000 ||
+ l == 0x0000ffff || l == 0xffff0000 )
break;
- dev_count++;
+ count++;
}
}
sec_bus++;
}
- } else if (scope->dev_type == ACPI_DEV_IOAPIC) {
- printk(KERN_WARNING PREFIX
- "found IOAPIC: bdf = %x:%x:%x\n", bus, path->dev, path->fn);
- dev_count++;
- } else {
- printk(KERN_WARNING PREFIX
- "found MSI HPET: bdf = %x:%x:%x\n", bus, path->dev, path->fn);
- dev_count++;
+ }
+ else if ( scope->dev_type == ACPI_DEV_IOAPIC )
+ {
+ printk(KERN_INFO PREFIX
+ "found IOAPIC: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
+ count++;
+ }
+ else
+ {
+ printk(KERN_INFO PREFIX
+ "found MSI HPET: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
+ count++;
}
start += scope->length;
}
- *cnt = dev_count;
+ return count;
+}
+
+static int __init acpi_parse_dev_scope(void *start, void *end, int *cnt,
+ struct pci_dev **devices)
+{
+ struct acpi_dev_scope *scope;
+ u8 bus, sub_bus, sec_bus;
+ struct acpi_pci_path *path;
+ struct acpi_ioapic_unit *acpi_ioapic_unit = NULL;
+ int depth;
+ struct pci_dev *pdev;
+ u8 dev, func;
+ u32 l;
+
+ *cnt = scope_device_count(start, end);
+ if ( *cnt == 0 )
+ {
+ printk(KERN_INFO PREFIX "acpi_parse_dev_scope: no device\n");
+ return 0;
+ }
+
*devices = xmalloc_array(struct pci_dev, *cnt);
- if (!*devices)
+ if ( !*devices )
return -ENOMEM;
memset(*devices, 0, sizeof(struct pci_dev) * (*cnt));
pdev = *devices;
- start = tmp;
- while (start < end) {
+ while ( start < end )
+ {
scope = start;
path = (struct acpi_pci_path *)(scope + 1);
- count = (scope->length - sizeof(struct acpi_dev_scope))
- /sizeof(struct acpi_pci_path);
+ depth = (scope->length - sizeof(struct acpi_dev_scope))
+ / sizeof(struct acpi_pci_path);
bus = scope->start_bus;
- while (--count) {
- bus = read_pci_config_byte(bus, path->dev, path->fn, PCI_SECONDARY_BUS);
+ while ( --depth )
+ {
+ bus = read_pci_config_byte(
+ bus, path->dev, path->fn, PCI_SECONDARY_BUS);
path++;
}
- if (scope->dev_type == ACPI_DEV_ENDPOINT) {
- printk(KERN_WARNING PREFIX
- "found endpoint: bdf = %x:%x:%x\n", bus, path->dev, path->fn);
-
+ if ( scope->dev_type == ACPI_DEV_ENDPOINT )
+ {
+ printk(KERN_INFO PREFIX
+ "found endpoint: bdf = %x:%x:%x\n",
+ bus, path->dev, path->fn);
pdev->bus = bus;
pdev->devfn = PCI_DEVFN(path->dev, path->fn);
pdev++;
- } else if (scope->dev_type == ACPI_DEV_P2PBRIDGE) {
- printk(KERN_WARNING PREFIX
- "found bridge: bus = %x dev = %x func = %x\n", bus, path->dev, path->fn);
-
- sec_bus = read_pci_config_byte(bus, path->dev, path->fn, PCI_SECONDARY_BUS);
- sub_bus = read_pci_config_byte(bus, path->dev, path->fn, PCI_SUBORDINATE_BUS);
-
- while (sec_bus <= sub_bus) {
- for (dev = 0; dev < 32; dev++) {
- for (func = 0; func < 8; func++) {
- l = read_pci_config(sec_bus, dev, func, PCI_VENDOR_ID);
-
- /* some broken boards return 0 or ~0 if a slot is empty: */
- if (l == 0xffffffff || l == 0x00000000 ||
- l == 0x0000ffff || l == 0xffff0000)
+ }
+ else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE )
+ {
+ printk(KERN_INFO PREFIX
+ "found bridge: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
+ sec_bus = read_pci_config_byte(
+ bus, path->dev, path->fn, PCI_SECONDARY_BUS);
+ sub_bus = read_pci_config_byte(
+ bus, path->dev, path->fn, PCI_SUBORDINATE_BUS);
+
+ while ( sec_bus <= sub_bus )
+ {
+ for ( dev = 0; dev < 32; dev++ )
+ {
+ for ( func = 0; func < 8; func++ )
+ {
+ l = read_pci_config(
+ sec_bus, dev, func, PCI_VENDOR_ID);
+
+ /* some broken boards return 0 or
+ * ~0 if a slot is empty
+ */
+ if ( l == 0xffffffff || l == 0x00000000 ||
+ l == 0x0000ffff || l == 0xffff0000 )
break;
pdev->bus = sec_bus;
}
sec_bus++;
}
- } else if (scope->dev_type == ACPI_DEV_IOAPIC) {
+ }
+ else if ( scope->dev_type == ACPI_DEV_IOAPIC )
+ {
acpi_ioapic_unit = xmalloc(struct acpi_ioapic_unit);
+ if ( !acpi_ioapic_unit )
+ return -ENOMEM;
acpi_ioapic_unit->apic_id = scope->enum_id;
acpi_ioapic_unit->ioapic.bdf.bus = bus;
acpi_ioapic_unit->ioapic.bdf.dev = path->dev;
acpi_ioapic_unit->ioapic.bdf.func = path->fn;
list_add(&acpi_ioapic_unit->list, &acpi_ioapic_units);
- printk(KERN_WARNING PREFIX
- "found IOAPIC: bus = %x dev = %x func = %x\n", bus, path->dev, path->fn);
- } else {
- printk(KERN_WARNING PREFIX
- "found MSI HPET: bus = %x dev = %x func = %x\n", bus, path->dev, path->fn);
+ printk(KERN_INFO PREFIX
+ "found IOAPIC: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
}
+ else
+ printk(KERN_INFO PREFIX
+ "found MSI HPET: bus = %x dev = %x func = %x\n",
+ bus, path->dev, path->fn);
start += scope->length;
}
static int include_all;
dmaru = xmalloc(struct acpi_drhd_unit);
- if (!dmaru)
+ if ( !dmaru )
return -ENOMEM;
memset(dmaru, 0, sizeof(struct acpi_drhd_unit));
dmaru->address = drhd->address;
dmaru->include_all = drhd->flags & 1; /* BIT0: INCLUDE_ALL */
- printk(KERN_WARNING PREFIX "dmaru->address = %lx\n", dmaru->address);
+ printk(KERN_INFO PREFIX "dmaru->address = %lx\n", dmaru->address);
- if (!dmaru->include_all) {
- ret = acpi_parse_dev_scope((void *)(drhd + 1),
- ((void *)drhd) + header->length,
- &dmaru->devices_cnt, &dmaru->devices);
- }
- else {
- printk(KERN_WARNING PREFIX "found INCLUDE_ALL\n");
+ if ( !dmaru->include_all )
+ ret = acpi_parse_dev_scope(
+ (void *)(drhd + 1),
+ ((void *)drhd) + header->length,
+ &dmaru->devices_cnt, &dmaru->devices);
+ else
+ {
+ printk(KERN_INFO PREFIX "found INCLUDE_ALL\n");
/* Only allow one INCLUDE_ALL */
- if (include_all) {
+ if ( include_all )
+ {
printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
- "device scope is allowed\n");
+ "device scope is allowed\n");
ret = -EINVAL;
}
include_all = 1;
}
- if (ret)
+ if ( ret )
xfree(dmaru);
else
acpi_register_drhd_unit(dmaru);
int ret = 0;
rmrru = xmalloc(struct acpi_rmrr_unit);
- if (!rmrru)
+ if ( !rmrru )
return -ENOMEM;
memset(rmrru, 0, sizeof(struct acpi_rmrr_unit));
-#ifdef VTD_DEBUG
- gdprintk(XENLOG_INFO VTDPREFIX,
- "acpi_parse_one_rmrr: base = %lx end = %lx\n",
- rmrr->base_address, rmrr->end_address);
-#endif
-
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
- ret = acpi_parse_dev_scope((void *)(rmrr + 1),
- ((void*)rmrr) + header->length,
- &rmrru->devices_cnt, &rmrru->devices);
+ printk(KERN_INFO PREFIX
+ "acpi_parse_one_rmrr: base=%"PRIx64" end=%"PRIx64"\n",
+ rmrr->base_address, rmrr->end_address);
+
+ ret = acpi_parse_dev_scope(
+ (void *)(rmrr + 1),
+ ((void*)rmrr) + header->length,
+ &rmrru->devices_cnt, &rmrru->devices);
- if (ret || (rmrru->devices_cnt == 0))
+ if ( ret || (rmrru->devices_cnt == 0) )
xfree(rmrru);
else
acpi_register_rmrr_unit(rmrru);
static int all_ports;
atsru = xmalloc(struct acpi_atsr_unit);
- if (!atsru)
+ if ( !atsru )
return -ENOMEM;
memset(atsru, 0, sizeof(struct acpi_atsr_unit));
atsru->all_ports = atsr->flags & 1; /* BIT0: ALL_PORTS */
- if (!atsru->all_ports) {
- ret = acpi_parse_dev_scope((void *)(atsr + 1),
- ((void *)atsr) + header->length,
- &atsru->devices_cnt, &atsru->devices);
- }
- else {
- printk(KERN_WARNING PREFIX "found ALL_PORTS\n");
+ if ( !atsru->all_ports )
+ ret = acpi_parse_dev_scope(
+ (void *)(atsr + 1),
+ ((void *)atsr) + header->length,
+ &atsru->devices_cnt, &atsru->devices);
+ else
+ {
+ printk(KERN_INFO PREFIX "found ALL_PORTS\n");
/* Only allow one ALL_PORTS */
- if (all_ports) {
+ if ( all_ports )
+ {
printk(KERN_WARNING PREFIX "Only one ALL_PORTS "
- "device scope is allowed\n");
+ "device scope is allowed\n");
ret = -EINVAL;
}
all_ports = 1;
}
- if (ret)
+ if ( ret )
xfree(atsr);
else
acpi_register_atsr_unit(atsru);
return ret;
}
-static void __init
-acpi_table_print_dmar_entry(struct acpi_dmar_entry_header *header)
-{
- struct acpi_table_drhd *drhd;
- struct acpi_table_rmrr *rmrr;
-
- switch (header->type) {
- case ACPI_DMAR_DRHD:
- drhd = (struct acpi_table_drhd *)header;
- break;
- case ACPI_DMAR_RMRR:
- rmrr = (struct acpi_table_rmrr *)header;
- break;
- }
-}
-
-static int __init
-acpi_parse_dmar(unsigned long phys_addr, unsigned long size)
+static int __init acpi_parse_dmar(unsigned long phys_addr,
+ unsigned long size)
{
struct acpi_table_dmar *dmar = NULL;
struct acpi_dmar_entry_header *entry_header;
int ret = 0;
- if (!phys_addr || !size)
+ if ( !phys_addr || !size )
return -EINVAL;
dmar = (struct acpi_table_dmar *)__acpi_map_table(phys_addr, size);
- if (!dmar) {
- printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+ if ( !dmar )
+ {
+ printk(KERN_WARNING PREFIX "Unable to map DMAR\n");
return -ENODEV;
}
- if (!dmar->haw) {
- printk (KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n");
+ if ( !dmar->haw )
+ {
+ printk(KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n");
return -EINVAL;
}
dmar_host_address_width = dmar->haw;
- printk (KERN_INFO PREFIX "Host address width %d\n",
- dmar_host_address_width);
+ printk(KERN_INFO PREFIX "Host address width %d\n",
+ dmar_host_address_width);
entry_header = (struct acpi_dmar_entry_header *)(dmar + 1);
- while (((unsigned long)entry_header) < (((unsigned long)dmar) + size)) {
- acpi_table_print_dmar_entry(entry_header);
-
- switch (entry_header->type) {
+ while ( ((unsigned long)entry_header) <
+ (((unsigned long)dmar) + size) )
+ {
+ switch ( entry_header->type )
+ {
case ACPI_DMAR_DRHD:
- printk (KERN_INFO PREFIX "found ACPI_DMAR_DRHD\n");
+ printk(KERN_INFO PREFIX "found ACPI_DMAR_DRHD\n");
ret = acpi_parse_one_drhd(entry_header);
break;
case ACPI_DMAR_RMRR:
- printk (KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n");
+ printk(KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n");
ret = acpi_parse_one_rmrr(entry_header);
break;
case ACPI_DMAR_ATSR:
- printk (KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n");
+ printk(KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n");
ret = acpi_parse_one_atsr(entry_header);
break;
default:
ret = -EINVAL;
break;
}
- if (ret)
+ if ( ret )
break;
entry_header = ((void *)entry_header + entry_header->length);
{
int rc;
- if (!vtd_enabled)
+ if ( !vtd_enabled )
return -ENODEV;
- if ((rc = vtd_hw_check()) != 0)
+ if ( (rc = vtd_hw_check()) != 0 )
return rc;
acpi_table_parse(ACPI_DMAR, acpi_parse_dmar);
- if (list_empty(&acpi_drhd_units)) {
+ if ( list_empty(&acpi_drhd_units) )
+ {
printk(KERN_ERR PREFIX "No DMAR devices found\n");
vtd_enabled = 0;
return -ENODEV;
iommu_flush_write_buffer(iommu);
}
}
+ unmap_domain_page(pte);
}
/* clear last level pte, a tlb flush should be followed */
{
struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data;
struct iommu *iommu;
-
+
if ( nr_iommus > MAX_IOMMUS )
{
gdprintk(XENLOG_ERR VTDPREFIX,
set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address);
iommu->reg = (void *) fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
- dprintk(XENLOG_INFO VTDPREFIX,
+ dprintk(XENLOG_ERR VTDPREFIX,
"iommu_alloc: iommu->reg = %p drhd->address = %lx\n",
iommu->reg, drhd->address);
nr_iommus++;
context = device_to_context_entry(iommu, bus, devfn);
if ( !context )
{
- gdprintk(XENLOG_INFO VTDPREFIX,
+ gdprintk(XENLOG_ERR VTDPREFIX,
"domain_context_mapping_one:context == NULL:"
"bdf = %x:%x:%x\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
return -ENOMEM;
}
- spin_lock_irqsave(&iommu->lock, flags);
+
if ( context_present(*context) )
{
- spin_unlock_irqrestore(&iommu->lock, flags);
gdprintk(XENLOG_INFO VTDPREFIX,
"domain_context_mapping_one:context present:bdf=%x:%x:%x\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
return 0;
}
-#ifdef VTD_DEBUG
- dprintk(XENLOG_INFO VTDPREFIX,
- "context_mapping_one_1-%x:%x:%x-*context = %lx %lx\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn), context->hi, context->lo);
-#endif
-
+ spin_lock_irqsave(&iommu->lock, flags);
/*
* domain_id 0 is not valid on Intel's IOMMU, force domain_id to
* be 1 based as required by intel's iommu hw.
context_set_address_width(*context, hd->agaw);
if ( ecap_pass_thru(iommu->ecap) )
- {
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
- }
else
{
context_set_address_root(*context, virt_to_maddr(hd->pgd));
context_set_present(*context);
iommu_flush_cache_entry(iommu, context);
-#ifdef VTD_DEBUG
- dprintk(XENLOG_INFO VTDPREFIX,
- "context_mapping_one_2-%x:%x:%x-*context=%lx %lx hd->pgd = %p\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- context->hi, context->lo, hd->pgd);
-#endif
+ gdprintk(XENLOG_INFO VTDPREFIX,
+ "context_mapping_one-%x:%x:%x-*context=%"PRIx64":%"PRIx64
+ " hd->pgd=%p\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ context->hi, context->lo, hd->pgd);
if ( iommu_flush_context_device(iommu, domain->domain_id,
- (((u16)bus) << 8) | devfn,
+ (((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT, 1) )
iommu_flush_write_buffer(iommu);
else
pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if ( pdev->bus == 0 )
- {
ret = domain_context_mapping_one(
domain, iommu, (u8)(pdev->bus), (u8)(pdev->devfn));
- }
else
{
if ( bus2bridge[pdev->bus].bus != 0 )
- gdprintk(XENLOG_ERR VTDPREFIX,
+ gdprintk(XENLOG_WARNING VTDPREFIX,
"domain_context_mapping:bus2bridge"
"[pdev->bus].bus != 0\n");
context = device_to_context_entry(iommu, bus, devfn);
if ( !context )
{
- gdprintk(XENLOG_INFO VTDPREFIX,
+ gdprintk(XENLOG_ERR VTDPREFIX,
"domain_context_unmap_one-%x:%x:%x- context == NULL:return\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
return -ENOMEM;
}
- spin_lock_irqsave(&iommu->lock, flags);
if ( !context_present(*context) )
{
- spin_unlock_irqrestore(&iommu->lock, flags);
- gdprintk(XENLOG_INFO VTDPREFIX,
+ gdprintk(XENLOG_WARNING VTDPREFIX,
"domain_context_unmap_one-%x:%x:%x- "
"context NOT present:return\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
gdprintk(XENLOG_INFO VTDPREFIX,
- "domain_context_unmap_one_1:bdf = %x:%x:%x\n",
+ "domain_context_unmap_one: bdf = %x:%x:%x\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ spin_lock_irqsave(&iommu->lock, flags);
context_clear_present(*context);
context_clear_entry(*context);
iommu_flush_cache_entry(iommu, context);
iommu_flush_iotlb_global(iommu, 0);
spin_unlock_irqrestore(&iommu->lock, flags);
- gdprintk(XENLOG_INFO VTDPREFIX,
- "domain_context_unmap_one_2:bdf = %x:%x:%x\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-
return 0;
}
"domain_context_unmap:PCI: bdf = %x:%x:%x\n",
pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if ( pdev->bus == 0 )
- {
ret = domain_context_unmap_one(
domain, iommu,
(u8)(pdev->bus), (u8)(pdev->devfn));
- }
else
{
if ( bus2bridge[pdev->bus].bus != 0 )
- gdprintk(XENLOG_INFO VTDPREFIX,
+ gdprintk(XENLOG_WARNING VTDPREFIX,
"domain_context_mapping:"
"bus2bridge[pdev->bus].bus != 0\n");
(u8)(bus2bridge[pdev->bus].bus),
(u8)(bus2bridge[pdev->bus].devfn));
- /* now map everything behind the PCI bridge */
+ /* Unmap everything behind the PCI bridge */
for ( dev = 0; dev < 32; dev++ )
{
for ( func = 0; func < 8; func++ )
int status;
unsigned long flags;
- gdprintk(XENLOG_ERR VTDPREFIX,
+ gdprintk(XENLOG_INFO VTDPREFIX,
"reassign_device-%x:%x:%x- source = %d target = %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
continue;
- pdev->bus = bus;
- pdev->devfn = devfn;
drhd = acpi_find_matched_drhd_unit(pdev);
iommu = drhd->iommu;
domain_context_unmap(source, iommu, pdev);
if ( list_empty(&acpi_drhd_units) )
return ret;
- dprintk(XENLOG_INFO VTDPREFIX,
- "assign_device: bus = %x dev = %x func = %x\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ gdprintk(XENLOG_INFO VTDPREFIX,
+ "assign_device: bus = %x dev = %x func = %x\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
reassign_device_ownership(dom0, d, bus, devfn);
if ( !hd->pgd )
{
pgd = (struct dma_pte *)alloc_xenheap_page();
- memset((u8*)pgd, 0, PAGE_SIZE);
- if ( !hd->pgd )
- hd->pgd = pgd;
- else /* somebody is fast */
- free_xenheap_page((void *) pgd);
- }
+ if ( !pgd )
+ {
+ spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ gdprintk(XENLOG_ERR VTDPREFIX,
+ "Allocate pgd memory failed!\n");
+ return;
+ }
+ memset(pgd, 0, PAGE_SIZE);
+ hd->pgd = pgd;
+ }
l3e = map_domain_page(p2m_table);
switch ( level )
case VTD_PAGE_TABLE_LEVEL_4: /* Stoakley */
/* We allocate one more page for the top vtd page table. */
pmd = (struct dma_pte *)alloc_xenheap_page();
+ if ( !pmd )
+ {
+ unmap_domain_page(l3e);
+ spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ gdprintk(XENLOG_ERR VTDPREFIX,
+ "Allocate pmd memory failed!\n");
+ return;
+ }
memset((u8*)pmd, 0, PAGE_SIZE);
pte = &pgd[0];
dma_set_pte_addr(*pte, virt_to_maddr(pmd));